* @buf = hypervisor buffer
* @addr = guest virtual or physical address to copy to/from
* @size = number of bytes to copy
- * @dir = HVM_COPY_IN / HVM_COPY_OUT
- * @phy = interpret addr as physical or virtual address?
- * Returns TRUE on success.
+ * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
+ * @phy = interpret addr as physical (TRUE) or virtual (FALSE) address?
+ * Returns number of bytes failed to copy (0 == complete success).
*/
static int __hvm_copy(
void *buf, unsigned long addr, int size, int dir, int phy)
struct vcpu *v = current;
unsigned long mfn;
char *p;
- int count;
+ int count, todo;
- while ( size > 0 )
+ todo = size;
+ while ( todo > 0 )
{
- count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), size);
+ count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
mfn = phy ?
get_mfn_from_gpfn(addr >> PAGE_SHIFT) :
mfn_x(sh_vcpu_gfn_to_mfn(v, shadow_gva_to_gfn(v, addr)));
if ( mfn == INVALID_MFN )
- return 0;
+ return todo;
p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
- if ( dir == HVM_COPY_IN )
- memcpy(buf, p, count);
+ if ( dir )
+ memcpy(p, buf, count); /* dir == TRUE: *to* guest */
else
- memcpy(p, buf, count);
+ memcpy(buf, p, count); /* dir == FALSE: *from guest */
unmap_domain_page(p);
addr += count;
buf += count;
- size -= count;
+ todo -= count;
}
- return 1;
+ return 0;
}
-int hvm_copy_phy(void *buf, unsigned long paddr, int size, int dir)
+int hvm_copy_to_guest_phys(unsigned long paddr, void *buf, int size)
{
- return __hvm_copy(buf, paddr, size, dir, 1);
+ return __hvm_copy(buf, paddr, size, 1, 1);
}
-int hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
+int hvm_copy_from_guest_phys(void *buf, unsigned long paddr, int size)
{
- return __hvm_copy(buf, vaddr, size, dir, 0);
+ return __hvm_copy(buf, paddr, size, 0, 1);
+}
+
+int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
+{
+ return __hvm_copy(buf, vaddr, size, 1, 0);
+}
+
+int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
+{
+ return __hvm_copy(buf, vaddr, size, 0, 0);
}
/*
}
pic = &v->domain->arch.hvm_domain.vpic;
if ( p->dir == 0 ) {
- if(p->pdata_valid)
- hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_IN);
+ if (p->pdata_valid)
+ (void)hvm_copy_from_guest_virt(
+ &data, (unsigned long)p->u.pdata, p->size);
else
data = p->u.data;
spin_lock_irqsave(&pic->lock, flags);
data = pic_ioport_read(
(void*)&pic->pics[p->addr>>7], (uint32_t) p->addr);
spin_unlock_irqrestore(&pic->lock, flags);
- if(p->pdata_valid)
- hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_OUT);
+ if (p->pdata_valid)
+ (void)hvm_copy_to_guest_virt(
+ (unsigned long)p->u.pdata, &data, p->size);
else
p->u.data = (u64)data;
}
s = &v->domain->arch.hvm_domain.vpic;
if ( p->dir == 0 ) {
- if(p->pdata_valid)
- hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_IN);
+ if (p->pdata_valid)
+ (void)hvm_copy_from_guest_virt(
+ &data, (unsigned long)p->u.pdata, p->size);
else
data = p->u.data;
spin_lock_irqsave(&s->lock, flags);
else {
data = (u64) elcr_ioport_read(
(void*)&s->pics[p->addr&1], (uint32_t) p->addr);
- if(p->pdata_valid)
- hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_OUT);
+ if (p->pdata_valid)
+ (void)hvm_copy_to_guest_virt(
+ (unsigned long)p->u.pdata, &data, p->size);
else
p->u.data = (u64)data;
data = read_handler(v,
req->addr + (sign * i * req->size),
req->size);
- hvm_copy(&data,
- (unsigned long)p->u.pdata + (sign * i * req->size),
- p->size,
- HVM_COPY_OUT);
+ (void)hvm_copy_to_guest_virt(
+ (unsigned long)p->u.pdata + (sign * i * req->size),
+ &data,
+ p->size);
}
} else { /* !req->dir == IOREQ_READ */
for (i = 0; i < req->count; i++) {
- hvm_copy(&data,
- (unsigned long)p->u.pdata + (sign * i * req->size),
- p->size,
- HVM_COPY_IN);
+ (void)hvm_copy_from_guest_virt(
+ &data,
+ (unsigned long)p->u.pdata + (sign * i * req->size),
+ p->size);
write_handler(v,
req->addr + (sign * i * req->size),
req->size, data);
addr += regs->es << 4;
if (sign > 0)
addr -= p->size;
- hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
+ (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
}
}
else /* p->dir == IOREQ_WRITE */
if (sign > 0)
addr -= p->size;
- hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
+ (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
}
if (mmio_opp->flags & REPZ)
{
if (inst_len > MAX_INST_LEN || inst_len <= 0)
return 0;
- if (!hvm_copy(buf, guest_eip, inst_len, HVM_COPY_IN))
+ if (hvm_copy_from_guest_virt(buf, guest_eip, inst_len))
return 0;
return inst_len;
}
regs->eip -= inst_len; /* do not advance %eip */
if (dir == IOREQ_WRITE)
- hvm_copy(&value, addr, size, HVM_COPY_IN);
+ (void)hvm_copy_from_guest_virt(&value, addr, size);
send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
} else {
if ((addr & PAGE_MASK) != ((addr + sign * (count * size - 1)) & PAGE_MASK)) {
return 0;
}
- return !hvm_copy((void *)from, (unsigned long)to, len, HVM_COPY_OUT);
+ return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
}
unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
return 0;
}
- return !hvm_copy(to, (unsigned long)from, len, HVM_COPY_IN);
+ return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
}
/*
pio_opp->flags |= OVERLAP;
if (dir == IOREQ_WRITE)
- hvm_copy(&value, addr, size, HVM_COPY_IN);
+ (void)hvm_copy_from_guest_virt(&value, addr, size);
send_pio_req(regs, port, 1, size, value, dir, 0);
}
ptr = eip & ~0xff;
len = 0;
- if (hvm_copy(opcode, ptr, sizeof(opcode), HVM_COPY_IN))
+ if (hvm_copy_from_guest_virt(opcode, ptr, sizeof(opcode)) == 0)
len = sizeof(opcode);
printf("Code bytes around(len=%d) %lx:", len, eip);
pio_opp->flags |= OVERLAP;
if (dir == IOREQ_WRITE)
- hvm_copy(&value, addr, size, HVM_COPY_IN);
+ (void)hvm_copy_from_guest_virt(&value, addr, size);
send_pio_req(regs, port, 1, size, value, dir, 0);
} else {
if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
u32 cp;
/* make sure vmxassist exists (this is not an error) */
- if (!hvm_copy_phy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), HVM_COPY_IN))
+ if (hvm_copy_from_guest_phys(&magic, VMXASSIST_MAGIC_OFFSET,
+ sizeof(magic)))
return 0;
if (magic != VMXASSIST_MAGIC)
return 0;
*/
case VMX_ASSIST_INVOKE:
/* save the old context */
- if (!hvm_copy_phy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
+ if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
goto error;
if (cp != 0) {
if (!vmx_world_save(v, &c))
goto error;
- if (!hvm_copy_phy(&c, cp, sizeof(c), HVM_COPY_OUT))
+ if (hvm_copy_to_guest_phys(cp, &c, sizeof(c)))
goto error;
}
/* restore the new context, this should activate vmxassist */
- if (!hvm_copy_phy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), HVM_COPY_IN))
+ if (hvm_copy_from_guest_phys(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp)))
goto error;
if (cp != 0) {
- if (!hvm_copy_phy(&c, cp, sizeof(c), HVM_COPY_IN))
+ if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
if (!vmx_world_restore(v, &c))
goto error;
*/
case VMX_ASSIST_RESTORE:
/* save the old context */
- if (!hvm_copy_phy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
+ if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
goto error;
if (cp != 0) {
- if (!hvm_copy_phy(&c, cp, sizeof(c), HVM_COPY_IN))
+ if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
if (!vmx_world_restore(v, &c))
goto error;
// It entirely ignores the permissions in the page tables.
// In this case, that is only a user vs supervisor access check.
//
- if ( hvm_copy(val, addr, bytes, HVM_COPY_IN) )
+ if ( hvm_copy_from_guest_virt(val, addr, bytes) == 0 )
{
#if 0
struct vcpu *v = current;
// In this case, that includes user vs supervisor, and
// write access.
//
- if ( hvm_copy(&val, addr, bytes, HVM_COPY_OUT) )
+ if ( hvm_copy_to_guest_virt(addr, &val, bytes) == 0 )
return X86EMUL_CONTINUE;
/* If we got here, there was nothing mapped here, or a bad GFN
extern int hvm_enabled;
-enum { HVM_COPY_IN = 0, HVM_COPY_OUT };
-extern int hvm_copy(void *buf, unsigned long vaddr, int size, int dir);
-extern int hvm_copy_phy(void *buf, unsigned long vaddr, int size, int dir);
-
-extern void hvm_setup_platform(struct domain* d);
-extern int hvm_mmio_intercept(ioreq_t *p);
-extern int hvm_io_intercept(ioreq_t *p, int type);
-extern int hvm_buffered_io_intercept(ioreq_t *p);
-extern void hvm_hooks_assist(struct vcpu *v);
-extern void hvm_print_line(struct vcpu *v, const char c);
-extern void hlt_timer_fn(void *data);
+int hvm_copy_to_guest_phys(unsigned long paddr, void *buf, int size);
+int hvm_copy_from_guest_phys(void *buf, unsigned long paddr, int size);
+int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
+int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
+
+void hvm_setup_platform(struct domain* d);
+int hvm_mmio_intercept(ioreq_t *p);
+int hvm_io_intercept(ioreq_t *p, int type);
+int hvm_buffered_io_intercept(ioreq_t *p);
+void hvm_hooks_assist(struct vcpu *v);
+void hvm_print_line(struct vcpu *v, const char c);
+void hlt_timer_fn(void *data);
void hvm_do_hypercall(struct cpu_user_regs *pregs);